bitkeeper revision 1.1236.32.12 (423c3be1O7_a1yKODsN2egtVDG1low)
authormafetter@fleming.research <mafetter@fleming.research>
Sat, 19 Mar 2005 14:49:05 +0000 (14:49 +0000)
committermafetter@fleming.research <mafetter@fleming.research>
Sat, 19 Mar 2005 14:49:05 +0000 (14:49 +0000)
Christian's changes for linux 2.6.10 so that it boots in shadow mode
with translation enabled.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
28 files changed:
linux-2.6.10-xen-sparse/arch/xen/configs/xen0_defconfig
linux-2.6.10-xen-sparse/arch/xen/i386/kernel/cpu/common.c
linux-2.6.10-xen-sparse/arch/xen/i386/kernel/ldt.c
linux-2.6.10-xen-sparse/arch/xen/i386/kernel/pci-dma.c
linux-2.6.10-xen-sparse/arch/xen/i386/kernel/process.c
linux-2.6.10-xen-sparse/arch/xen/i386/kernel/setup.c
linux-2.6.10-xen-sparse/arch/xen/i386/mm/fault.c
linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c
linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c
linux-2.6.10-xen-sparse/arch/xen/i386/mm/ioremap.c
linux-2.6.10-xen-sparse/arch/xen/i386/mm/pageattr.c
linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c
linux-2.6.10-xen-sparse/arch/xen/kernel/reboot.c
linux-2.6.10-xen-sparse/drivers/xen/balloon/balloon.c
linux-2.6.10-xen-sparse/drivers/xen/blkback/blkback.c
linux-2.6.10-xen-sparse/drivers/xen/blkfront/blkfront.c
linux-2.6.10-xen-sparse/drivers/xen/blkfront/vbd.c
linux-2.6.10-xen-sparse/drivers/xen/netback/netback.c
linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c
linux-2.6.10-xen-sparse/drivers/xen/privcmd/privcmd.c
linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/desc.h
linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/fixmap.h
linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/io.h
linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/page.h
linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/pgtable.h
xen/include/public/arch-x86_32.h

index 408113f9201e96bd766be90b520683fbb03469e8..f518333013c3f9e5b88965e63dccf77410b8af98 100644 (file)
@@ -12,9 +12,9 @@ CONFIG_NO_IDLE_HZ=y
 #
 CONFIG_XEN_PRIVILEGED_GUEST=y
 CONFIG_XEN_PHYSDEV_ACCESS=y
-CONFIG_XEN_BLKDEV_BACKEND=y
+# CONFIG_XEN_BLKDEV_BACKEND is not set
 # CONFIG_XEN_BLKDEV_TAP_BE is not set
-CONFIG_XEN_NETDEV_BACKEND=y
+# CONFIG_XEN_NETDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
 # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
index 750524cfcfc3ef0a3c71a22eb4985e89b5902bed..dc66f11e3ffffc58b6f1dd4274c4fd1495b8e0f0 100644 (file)
@@ -512,7 +512,7 @@ void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
        for (va = gdt_descr->address, f = 0;
             va < gdt_descr->address + gdt_descr->size;
             va += PAGE_SIZE, f++) {
-               frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+               frames[f] = __vms_virt_to_machine(va) >> PAGE_SHIFT;
                make_page_readonly((void *)va);
        }
        flush_page_update_queue();
index 53f031503935dd7fca22ccb417b189d8e8714cf4..426f8be75a23f568981f7f9b968dacf791c1a6ac 100644 (file)
@@ -61,8 +61,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
                cpumask_t mask;
                preempt_disable();
 #endif
+#if 0
                make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
                                    PAGE_SIZE);
+#endif
                load_LDT(pc);
                flush_page_update_queue();
 #ifdef CONFIG_SMP
@@ -73,8 +75,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 #endif
        }
        if (oldsize) {
+#if 0
                make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
                        PAGE_SIZE);
+#endif
                flush_page_update_queue();
                if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
                        vfree(oldldt);
@@ -90,8 +94,10 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
        if (err < 0)
                return err;
        memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+#if 0
        make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
                            PAGE_SIZE);
+#endif
        flush_page_update_queue();
        return 0;
 }
@@ -124,9 +130,11 @@ void destroy_context(struct mm_struct *mm)
        if (mm->context.size) {
                if (mm == current->active_mm)
                        clear_LDT();
+#if 0
                make_pages_writable(mm->context.ldt, 
                                    (mm->context.size * LDT_ENTRY_SIZE) /
                                    PAGE_SIZE);
+#endif
                flush_page_update_queue();
                if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
                        vfree(mm->context.ldt);
@@ -222,7 +230,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
        }
 
        lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
-       mach_lp = arbitrary_virt_to_machine(lp);
+       mach_lp = arbitrary_virt_to_phys(lp);
 
        /* Allow LDTs to be cleared by the user. */
        if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
index c3462814051a0fe8bdf74d8d55ca5af5d76e3a7d..47e00650fd9e33f40006fb141b4ab9eda4241a7d 100644 (file)
@@ -30,6 +30,8 @@ struct dma_coherent_mem {
 static void
 xen_contig_memory(unsigned long vstart, unsigned int order)
 {
+#define HACK
+#ifndef HACK
        /*
         * Ensure multi-page extents are contiguous in machine memory.
         * This code could be cleaned up some, and the number of
@@ -76,6 +78,7 @@ xen_contig_memory(unsigned long vstart, unsigned int order)
        xen_tlb_flush();
 
         balloon_unlock(flags);
+#endif
 }
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
index 017b167c388f91f97ae44e1d439fc4fd4edd8a59..3af462f874beaa72c1148391987a0ff81dc7830a 100644 (file)
@@ -518,7 +518,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
        if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||        \
                     next->tls_array[i].b != prev->tls_array[i].b))         \
                queue_multicall3(__HYPERVISOR_update_descriptor,            \
-                                virt_to_machine(&get_cpu_gdt_table(cpu)    \
+                                virt_to_phys(&get_cpu_gdt_table(cpu)       \
                                                 [GDT_ENTRY_TLS_MIN + i]),  \
                                 ((u32 *)&next->tls_array[i])[0],           \
                                 ((u32 *)&next->tls_array[i])[1]);          \
index 5285439c364b153a8d72e27fa434b123bdeba439..f4e25353d292b3b91db6d7cd9706809b20318458 100644 (file)
@@ -345,8 +345,8 @@ static void __init probe_roms(void)
 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
 EXPORT_SYMBOL(HYPERVISOR_shared_info);
 
-unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
-EXPORT_SYMBOL(phys_to_machine_mapping);
+unsigned int *__vms_phys_to_machine_mapping, *__vms_pfn_to_mfn_frame_list;
+EXPORT_SYMBOL(__vms_phys_to_machine_mapping);
 
 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
 DEFINE_PER_CPU(int, nr_multicall_ents);
@@ -1142,7 +1142,7 @@ static unsigned long __init setup_memory(void)
        }
 #endif
 
-       phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
+       __vms_phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
 
        return max_low_pfn;
 }
@@ -1437,11 +1437,11 @@ void __init setup_arch(char **cmdline_p)
 
        /* Make sure we have a large enough P->M table. */
        if (max_pfn > xen_start_info.nr_pages) {
-               phys_to_machine_mapping = alloc_bootmem_low_pages(
+               __vms_phys_to_machine_mapping = alloc_bootmem_low_pages(
                        max_pfn * sizeof(unsigned long));
-               memset(phys_to_machine_mapping, ~0,
+               memset(__vms_phys_to_machine_mapping, ~0,
                        max_pfn * sizeof(unsigned long));
-               memcpy(phys_to_machine_mapping,
+               memcpy(__vms_phys_to_machine_mapping,
                        (unsigned long *)xen_start_info.mfn_list,
                        xen_start_info.nr_pages * sizeof(unsigned long));
                free_bootmem(
@@ -1450,14 +1450,14 @@ void __init setup_arch(char **cmdline_p)
                        sizeof(unsigned long))));
        }
 
-       pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
+       __vms_pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
        for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
        {       
-            pfn_to_mfn_frame_list[j] = 
-                 virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+            __vms_pfn_to_mfn_frame_list[j] = 
+                 __vms_virt_to_machine(&__vms_phys_to_machine_mapping[i]) >> PAGE_SHIFT;
        }
        HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
-            virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+            __vms_virt_to_machine(__vms_pfn_to_mfn_frame_list) >> PAGE_SHIFT;
 
 
        /*
index 416b2be163ee60f108e73999ba67b8d1ee1da4f6..288ceade52ce3f9bfe16bbda7e72341cd369c645 100644 (file)
@@ -458,8 +458,8 @@ no_context:
        printk("%08lx\n", regs->eip);
        page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
            [address >> 22];
-       printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
-              machine_to_phys(page));
+       printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n",
+              __vms_phys_to_machine(page), page);
        /*
         * We must not directly access the pte in the highpte
         * case, the page table might be allocated in highmem.
@@ -470,10 +470,9 @@ no_context:
        if (page & 1) {
                page &= PAGE_MASK;
                address &= 0x003ff000;
-               page = machine_to_phys(page);
                page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
-               printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
-                      machine_to_phys(page));
+               printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n",
+                      __vms_phys_to_machine(page), page);
        }
 #endif
        show_trace(NULL, (unsigned long *)&regs[1]);
index a6cbb32231051d668062499af9e2fa5b6abf1b51..d7d48bdfe881983a5be5391cab5fd81ba7214065 100644 (file)
@@ -56,7 +56,7 @@ static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
 #ifdef CONFIG_SMP
 #define QUEUE_SIZE 1
 #else
-#define QUEUE_SIZE 128
+#define QUEUE_SIZE 1
 #endif
 #endif
 
@@ -125,14 +125,12 @@ static inline void increment_index_and_flush(void)
 
 void queue_l1_entry_update(pte_t *ptr, unsigned long val)
 {
-    _flush_page_update_queue();
-    *(unsigned long *)ptr = val;
+    set_pte(ptr, __pte(val));
 }
 
 void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
 {
-    _flush_page_update_queue();
-    *(unsigned long *)ptr = val;
+    set_pmd(ptr, __pmd(val));
 }
 
 void queue_pt_switch(unsigned long ptr)
@@ -142,7 +140,7 @@ void queue_pt_switch(unsigned long ptr)
     unsigned long flags;
     spin_lock_irqsave(&update_lock, flags);
     idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
+    per_cpu(update_queue[idx], cpu).ptr  = __vms_phys_to_machine(ptr);
     per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
     per_cpu(update_queue[idx], cpu).val  = MMUEXT_NEW_BASEPTR;
     increment_index();
@@ -176,56 +174,56 @@ void queue_invlpg(unsigned long ptr)
     spin_unlock_irqrestore(&update_lock, flags);
 }
 
-void queue_pgd_pin(unsigned long ptr)
+void __vms_queue_pgd_pin(unsigned long ptr)
 {
     int cpu = smp_processor_id();
     int idx;
     unsigned long flags;
     spin_lock_irqsave(&update_lock, flags);
     idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
+    per_cpu(update_queue[idx], cpu).ptr  = __vms_phys_to_machine(ptr);
     per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
     per_cpu(update_queue[idx], cpu).val  = MMUEXT_PIN_L2_TABLE;
     increment_index();
     spin_unlock_irqrestore(&update_lock, flags);
 }
 
-void queue_pgd_unpin(unsigned long ptr)
+void __vms_queue_pgd_unpin(unsigned long ptr)
 {
     int cpu = smp_processor_id();
     int idx;
     unsigned long flags;
     spin_lock_irqsave(&update_lock, flags);
     idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
+    per_cpu(update_queue[idx], cpu).ptr  = __vms_phys_to_machine(ptr);
     per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
     per_cpu(update_queue[idx], cpu).val  = MMUEXT_UNPIN_TABLE;
     increment_index();
     spin_unlock_irqrestore(&update_lock, flags);
 }
 
-void queue_pte_pin(unsigned long ptr)
+void __vms_queue_pte_pin(unsigned long ptr)
 {
     int cpu = smp_processor_id();
     int idx;
     unsigned long flags;
     spin_lock_irqsave(&update_lock, flags);
     idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
+    per_cpu(update_queue[idx], cpu).ptr  = __vms_phys_to_machine(ptr);
     per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
     per_cpu(update_queue[idx], cpu).val  = MMUEXT_PIN_L1_TABLE;
     increment_index();
     spin_unlock_irqrestore(&update_lock, flags);
 }
 
-void queue_pte_unpin(unsigned long ptr)
+void __vms_queue_pte_unpin(unsigned long ptr)
 {
     int cpu = smp_processor_id();
     int idx;
     unsigned long flags;
     spin_lock_irqsave(&update_lock, flags);
     idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
+    per_cpu(update_queue[idx], cpu).ptr  = __vms_phys_to_machine(ptr);
     per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
     per_cpu(update_queue[idx], cpu).val  = MMUEXT_UNPIN_TABLE;
     increment_index();
@@ -261,12 +259,12 @@ void queue_machphys_update(unsigned long mfn, unsigned long pfn)
 /* queue and flush versions of the above */
 void xen_l1_entry_update(pte_t *ptr, unsigned long val)
 {
-    *(unsigned long *)ptr = val;
+    set_pte(ptr, __pte(val));
 }
 
 void xen_l2_entry_update(pmd_t *ptr, unsigned long val)
 {
-    *(unsigned long *)ptr = val;
+    set_pmd(ptr, __pmd(val));
 }
 
 void xen_pt_switch(unsigned long ptr)
@@ -276,7 +274,7 @@ void xen_pt_switch(unsigned long ptr)
     unsigned long flags;
     spin_lock_irqsave(&update_lock, flags);
     idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
+    per_cpu(update_queue[idx], cpu).ptr  = __vms_phys_to_machine(ptr);
     per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
     per_cpu(update_queue[idx], cpu).val  = MMUEXT_NEW_BASEPTR;
     increment_index_and_flush();
@@ -310,56 +308,56 @@ void xen_invlpg(unsigned long ptr)
     spin_unlock_irqrestore(&update_lock, flags);
 }
 
-void xen_pgd_pin(unsigned long ptr)
+void __vms_xen_pgd_pin(unsigned long ptr)
 {
     int cpu = smp_processor_id();
     int idx;
     unsigned long flags;
     spin_lock_irqsave(&update_lock, flags);
     idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
+    per_cpu(update_queue[idx], cpu).ptr  = __vms_phys_to_machine(ptr);
     per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
     per_cpu(update_queue[idx], cpu).val  = MMUEXT_PIN_L2_TABLE;
     increment_index_and_flush();
     spin_unlock_irqrestore(&update_lock, flags);
 }
 
-void xen_pgd_unpin(unsigned long ptr)
+void __vms_xen_pgd_unpin(unsigned long ptr)
 {
     int cpu = smp_processor_id();
     int idx;
     unsigned long flags;
     spin_lock_irqsave(&update_lock, flags);
     idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
+    per_cpu(update_queue[idx], cpu).ptr  = __vms_phys_to_machine(ptr);
     per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
     per_cpu(update_queue[idx], cpu).val  = MMUEXT_UNPIN_TABLE;
     increment_index_and_flush();
     spin_unlock_irqrestore(&update_lock, flags);
 }
 
-void xen_pte_pin(unsigned long ptr)
+void __vms_xen_pte_pin(unsigned long ptr)
 {
     int cpu = smp_processor_id();
     int idx;
     unsigned long flags;
     spin_lock_irqsave(&update_lock, flags);
     idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
+    per_cpu(update_queue[idx], cpu).ptr  = __vms_phys_to_machine(ptr);
     per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
     per_cpu(update_queue[idx], cpu).val  = MMUEXT_PIN_L1_TABLE;
     increment_index_and_flush();
     spin_unlock_irqrestore(&update_lock, flags);
 }
 
-void xen_pte_unpin(unsigned long ptr)
+void __vms_xen_pte_unpin(unsigned long ptr)
 {
     int cpu = smp_processor_id();
     int idx;
     unsigned long flags;
     spin_lock_irqsave(&update_lock, flags);
     idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
+    per_cpu(update_queue[idx], cpu).ptr  = __vms_phys_to_machine(ptr);
     per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
     per_cpu(update_queue[idx], cpu).val  = MMUEXT_UNPIN_TABLE;
     increment_index_and_flush();
@@ -421,7 +419,7 @@ unsigned long allocate_empty_lowmem_region(unsigned long pages)
         pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
         pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
         queue_l1_entry_update(pte, 0);
-        phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
+        __vms_phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
     }
 
     /* Flush updates through and flush the TLB. */
index b104145e11eb70ffc88e31b68ad1c89ed58fe444..e3bdfeead1150022febf4becbedef6da4d288af5 100644 (file)
@@ -349,12 +349,9 @@ static void __init pagetable_init (void)
         * it. We clean up by write-enabling and then freeing the old page dir.
         */
        memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
-       //make_page_readonly(new_pgd);
-       queue_pgd_pin(__pa(new_pgd));
        load_cr3(new_pgd);
-       queue_pgd_unpin(__pa(old_pgd));
        __flush_tlb_all(); /* implicit flush */
-       make_page_writable(old_pgd);
+       //make_page_writable(old_pgd);
        flush_page_update_queue();
        free_bootmem(__pa(old_pgd), PAGE_SIZE);
 
@@ -562,7 +559,8 @@ void __init paging_init(void)
 
        /* Switch to the real shared_info page, and clear the dummy page. */
        flush_page_update_queue();
-       set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+        printk("xen_start_info.shared_info=%x\n", xen_start_info.shared_info);
+       set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
        HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
        memset(empty_zero_page, 0, sizeof(empty_zero_page));
 
@@ -570,10 +568,10 @@ void __init paging_init(void)
        /* Setup mapping of lower 1st MB */
        for (i = 0; i < NR_FIX_ISAMAPS; i++)
                if (xen_start_info.flags & SIF_PRIVILEGED)
-                       set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
+                       __vms_set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
                else
-                       set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
-                                        virt_to_machine(empty_zero_page));
+                       __vms_set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
+                                        __vms_virt_to_machine(empty_zero_page));
 #endif
 }
 
index efd4663bcc46b293390f7f973961cb85ffe4a9cf..19e103ac4ab4b05f4f4eb2fc3b82aba2ce380ceb 100644 (file)
@@ -56,9 +56,8 @@ void __init bt_iounmap(void *addr, unsigned long size)
 static inline int is_local_lowmem(unsigned long address)
 {
        extern unsigned long max_low_pfn;
-       unsigned long mfn = address >> PAGE_SHIFT;
-       unsigned long pfn = mfn_to_pfn(mfn);
-       return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
+       unsigned long pfn = address >> PAGE_SHIFT;
+       return (pfn < max_low_pfn);
 }
 
 /*
@@ -97,6 +96,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
        /*
         * Don't allow anybody to remap normal RAM that we're using..
         */
+#if 0
        if (is_local_lowmem(phys_addr)) {
                char *t_addr, *t_end;
                struct page *page;
@@ -110,6 +110,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
 
                domid = DOMID_LOCAL;
        }
+#endif
 
        /*
         * Mappings have to be page-aligned
@@ -255,7 +256,7 @@ void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
         */
        idx = FIX_BTMAP_BEGIN;
        while (nrpages > 0) {
-               set_fixmap_ma(idx, phys_addr);
+               __vms_set_fixmap_ma(idx, phys_addr);
                phys_addr += PAGE_SIZE;
                --idx;
                --nrpages;
@@ -312,7 +313,7 @@ static inline void direct_remap_area_pte(pte_t *pte,
                BUG();
 
        do {
-               (*v)->ptr = virt_to_machine(pte);
+               (*v)->ptr = __vms_virt_to_machine(pte);
                (*v)++;
                address += PAGE_SIZE;
                pte++;
@@ -386,7 +387,7 @@ int direct_remap_area_pages(struct mm_struct *mm,
        mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
 
        v = w = &u[0];
-       if (domid != DOMID_LOCAL) {
+       if (0 && domid != DOMID_LOCAL) {
                u[0].ptr  = MMU_EXTENDED_COMMAND;
                u[0].val  = MMUEXT_SET_FOREIGNDOM;
                u[0].val |= (unsigned long)domid << 16;
@@ -415,8 +416,19 @@ int direct_remap_area_pages(struct mm_struct *mm,
                 * Fill in the machine address: PTE ptr is done later by
                 * __direct_remap_area_pages(). 
                 */
-               v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
-
+        {
+            mmu_update_t update;
+            int success = 0;
+            unsigned long ppfn;
+
+            update.ptr = (machine_addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
+            update.val = -1;
+            ppfn = HYPERVISOR_mmu_update(&update, 1, &success);
+            if (! success)
+                BUG();
+                
+               v->val = (ppfn << PAGE_SHIFT) | pgprot_val(prot);
+        }
                machine_addr += PAGE_SIZE;
                address += PAGE_SIZE; 
                v++;
index 7c961c7a7d9f730571a2554361e2a81d034f8f4c..051c66d5b25347157b109730905f4efe2a84056d 100644 (file)
@@ -119,7 +119,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
                if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
                        pte_t old = *kpte;
                        pte_t standard = mk_pte(page, PAGE_KERNEL); 
-                       set_pte_batched(kpte, mk_pte(page, prot)); 
+                       set_pte_atomic(kpte, mk_pte(page, prot)); 
                        if (pte_same(old,standard))
                                get_page(kpte_page);
                } else {
@@ -130,7 +130,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
                        set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
                }       
        } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
-               set_pte_batched(kpte, mk_pte(page, PAGE_KERNEL));
+               set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
                __put_page(kpte_page);
        }
 
@@ -171,7 +171,6 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
                if (err) 
                        break; 
        }       
-       flush_page_update_queue();
        spin_unlock_irqrestore(&cpa_lock, flags);
        return err;
 }
index e83f45c05de9436a4e868222758485205c816a62..c6fb1480a2a290eaea4d59da33bcfbe75c008ae1 100644 (file)
@@ -93,7 +93,7 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
  * Associate a virtual page frame with a given physical page frame 
  * and protection flags for that frame.
  */ 
-static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
+static void __vms_set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
                           pgprot_t flags)
 {
        pgd_t *pgd;
@@ -112,7 +112,18 @@ static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
        }
        pte = pte_offset_kernel(pmd, vaddr);
        /* <pfn,flags> stored as-is, to permit clearing entries */
-       set_pte(pte, pfn_pte_ma(pfn, flags));
+        {
+            mmu_update_t update;
+            int success = 0;
+            unsigned long ppfn;
+
+            update.ptr = (pfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+            update.val = -1;
+            ppfn = HYPERVISOR_mmu_update(&update, 1, &success);
+            if (! success)
+                BUG();
+            set_pte(pte, pfn_pte(ppfn, flags));
+        }
 
        /*
         * It's enough to flush this one mapping.
@@ -165,7 +176,7 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
        set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
 }
 
-void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
+void __vms___set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
 {
        unsigned long address = __fix_to_virt(idx);
 
@@ -173,7 +184,7 @@ void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t fla
                BUG();
                return;
        }
-       set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
+       __vms_set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
 }
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
@@ -181,8 +192,6 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
        pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
        if (pte) {
                clear_page(pte);
-               //make_page_readonly(pte);
-               xen_flush_page_update_queue();
        }
        return pte;
 }
@@ -194,9 +203,6 @@ void pte_ctor(void *pte, kmem_cache_t *cache, unsigned long unused)
        set_page_count(page, 1);
 
        clear_page(pte);
-       //make_page_readonly(pte);
-       queue_pte_pin(__pa(pte));
-       flush_page_update_queue();
 }
 
 void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
@@ -204,9 +210,6 @@ void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
        struct page *page = virt_to_page(pte);
        ClearPageForeign(page);
 
-       queue_pte_unpin(__pa(pte));
-       make_page_writable(pte);
-       flush_page_update_queue();
 }
 
 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
@@ -239,7 +242,7 @@ void pte_free(struct page *pte)
        if (pte < highmem_start_page)
 #endif
                kmem_cache_free(pte_cache,
-                               phys_to_virt(page_to_pseudophys(pte)));
+                               phys_to_virt(__vms_page_to_pseudophys(pte)));
 #ifdef CONFIG_HIGHPTE
        else
                __free_page(pte);
@@ -304,9 +307,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
        spin_unlock_irqrestore(&pgd_lock, flags);
        memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
  out:
-       //make_page_readonly(pgd);
-       queue_pgd_pin(__pa(pgd));
-       flush_page_update_queue();
+       ;
 }
 
 /* never called when PTRS_PER_PMD > 1 */
@@ -314,10 +315,6 @@ void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
 {
        unsigned long flags; /* can be called from interrupt context */
 
-       queue_pgd_unpin(__pa(pgd));
-       make_page_writable(pgd);
-       flush_page_update_queue();
-
        if (PTRS_PER_PMD > 1)
                return;
 
@@ -383,15 +380,17 @@ void make_page_readonly(void *va)
        pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
        pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
        queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+#if 0
        if ( (unsigned long)va >= (unsigned long)high_memory )
        {
                unsigned long phys;
-               phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
+               phys = __vms_machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
 #ifdef CONFIG_HIGHMEM
                if ( (phys >> PAGE_SHIFT) < highstart_pfn )
 #endif
                        make_lowmem_page_readonly(phys_to_virt(phys));
        }
+#endif
 }
 
 void make_page_writable(void *va)
@@ -403,7 +402,7 @@ void make_page_writable(void *va)
        if ( (unsigned long)va >= (unsigned long)high_memory )
        {
                unsigned long phys;
-               phys = machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
+               phys = __vms_machine_to_phys(*(unsigned long *)pte & PAGE_MASK);
 #ifdef CONFIG_HIGHMEM
                if ( (phys >> PAGE_SHIFT) < highstart_pfn )
 #endif
index f69db851a4fc77b310c6df7ef01f550fc7110e66..c0f2ece9cba8ed70168eaeddb5125bfdc7ba4254 100644 (file)
@@ -80,7 +80,7 @@ static void __do_suspend(void)
     extern void time_suspend(void);
     extern void time_resume(void);
     extern unsigned long max_pfn;
-    extern unsigned int *pfn_to_mfn_frame_list;
+    extern unsigned int *__vms_pfn_to_mfn_frame_list;
 
     suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
     if ( suspend_record == NULL )
@@ -105,7 +105,7 @@ static void __do_suspend(void)
 
     memcpy(&suspend_record->resume_info, &xen_start_info, sizeof(xen_start_info));
 
-    HYPERVISOR_suspend(virt_to_machine(suspend_record) >> PAGE_SHIFT);
+    HYPERVISOR_suspend(__vms_virt_to_machine(suspend_record) >> PAGE_SHIFT);
 
     HYPERVISOR_vm_assist(VMASST_CMD_enable,
                         VMASST_TYPE_4gb_segments);
@@ -118,11 +118,7 @@ static void __do_suspend(void)
 
     memcpy(&xen_start_info, &suspend_record->resume_info, sizeof(xen_start_info));
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
-#else
     set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
-#endif
 
     HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
 
@@ -130,11 +126,11 @@ static void __do_suspend(void)
 
     for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
     {
-        pfn_to_mfn_frame_list[j] = 
-            virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+        __vms_pfn_to_mfn_frame_list[j] = 
+            __vms_virt_to_machine(&__vms_phys_to_machine_mapping[i]) >> PAGE_SHIFT;
     }
     HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
-        virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+        __vms_virt_to_machine(__vms_pfn_to_mfn_frame_list) >> PAGE_SHIFT;
 
 
     irq_resume();
index ad951fa9847493d832841086f66b32c64e539bf9..901df5f120e6e150b37b4e1ee1f9516b98a78e30 100644 (file)
@@ -206,11 +206,11 @@ static void balloon_process(void *unused)
                 BUG();
 
             pfn = page - mem_map;
-            if ( phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
+            if ( __vms_phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
                 BUG();
 
             /* Update P->M and M->P tables. */
-            phys_to_machine_mapping[pfn] = mfn_list[i];
+            __vms_phys_to_machine_mapping[pfn] = mfn_list[i];
             queue_machphys_update(mfn_list[i], pfn);
             
             /* Link back into the page tables if it's not a highmem page. */
@@ -244,8 +244,8 @@ static void balloon_process(void *unused)
             }
 
             pfn = page - mem_map;
-            mfn_list[i] = phys_to_machine_mapping[pfn];
-            phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
+            mfn_list[i] = __vms_phys_to_machine_mapping[pfn];
+            __vms_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
 
             if ( !PageHighMem(page) )
             {
index 4f74a1c5142d95979b1fe095647cf75b419a0740..a5811384f6f5b5bac098e1f241ece907ddeb9de5 100644 (file)
@@ -444,7 +444,7 @@ static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
 #else
         mcl[i].args[3] = blkif->domid;
 #endif
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
+        __vms_phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
             FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT);
     }
 
index 65c7011e76dce839942e963731086af041fb869a..50086b631f60dded12c002cde0a75389ccaac211 100644 (file)
@@ -129,7 +129,7 @@ static inline void translate_req_to_pfn(blkif_request_t *xreq,
     xreq->sector_number = req->sector_number;
 
     for ( i = 0; i < req->nr_segments; i++ )
-        xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
+        xreq->frame_and_sects[i] = __vms_machine_to_phys(req->frame_and_sects[i]);
 }
 
 static inline void translate_req_to_mfn(blkif_request_t *xreq,
@@ -144,7 +144,7 @@ static inline void translate_req_to_mfn(blkif_request_t *xreq,
     xreq->sector_number = req->sector_number;
 
     for ( i = 0; i < req->nr_segments; i++ )
-        xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
+        xreq->frame_and_sects[i] = __vms_phys_to_machine(req->frame_and_sects[i]);
 }
 
 
@@ -1091,7 +1091,7 @@ static void blkif_send_interface_connect(void)
     blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
     
     msg->handle      = 0;
-    msg->shmem_frame = (virt_to_machine(blk_ring.sring) >> PAGE_SHIFT);
+    msg->shmem_frame = (__vms_virt_to_machine(blk_ring.sring) >> PAGE_SHIFT);
     
     ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
 }
@@ -1401,7 +1401,7 @@ void blkif_completion(blkif_request_t *req)
         for ( i = 0; i < req->nr_segments; i++ )
         {
             unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
-            unsigned long mfn = phys_to_machine_mapping[pfn];
+            unsigned long mfn = __vms_phys_to_machine_mapping[pfn];
             xen_machphys_update(mfn, pfn);
         }
         break;
index 8050e756ca6b800a9a53ca7ef1f6ca2cb8b106b7..91b7ac8366051423c0fb7b541814fa618ac92e83 100644 (file)
@@ -112,7 +112,7 @@ static int xlvbd_get_vbd_info(vdisk_t *disk_info)
     memset(&req, 0, sizeof(req));
     req.operation   = BLKIF_OP_PROBE;
     req.nr_segments = 1;
-    req.frame_and_sects[0] = virt_to_machine(buf) | 7;
+    req.frame_and_sects[0] = __vms_virt_to_machine(buf) | 7;
 
     blkif_control_send(&req, &rsp);
 
index 71a3422129fe7914528b17af6719a493e454ee4c..85c2357040a1d69e2f90ac5a9de3062b0736f0ff 100644 (file)
@@ -207,7 +207,7 @@ static void net_rx_action(unsigned long unused)
     {
         netif   = netdev_priv(skb->dev);
         vdata   = (unsigned long)skb->data;
-        mdata   = virt_to_machine(vdata);
+        mdata   = __vms_virt_to_machine(vdata);
 
         /* Memory squeeze? Back off for an arbitrary while. */
         if ( (new_mfn = alloc_mfn()) == 0 )
@@ -223,7 +223,7 @@ static void net_rx_action(unsigned long unused)
          * Set the new P2M table entry before reassigning the old data page.
          * Heed the comment in pgtable-2level.h:pte_page(). :-)
          */
-        phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
+        __vms_phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
         
         mmu[0].ptr  = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
         mmu[0].val  = __pa(vdata) >> PAGE_SHIFT;  
@@ -590,7 +590,7 @@ static void net_tx_action(unsigned long unused)
             continue;
         }
 
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
+        __vms_phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
             FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
 
         data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
index 89154d457698af8f8fdab6ea01e777a457d9bc00..bf84ac2f45ee7d47cd5ba6e111da6406a45ff41c 100644 (file)
@@ -375,10 +375,10 @@ static void network_alloc_rx_buffers(struct net_device *dev)
         
         np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
         
-        rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
+        rx_pfn_array[i] = __vms_virt_to_machine(skb->head) >> PAGE_SHIFT;
 
        /* Remove this page from pseudo phys map before passing back to Xen. */
-       phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 
+       __vms_phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 
            = INVALID_P2M_ENTRY;
 
         rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
@@ -464,7 +464,7 @@ static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
     tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
 
     tx->id   = id;
-    tx->addr = virt_to_machine(skb->data);
+    tx->addr = __vms_virt_to_machine(skb->data);
     tx->size = skb->len;
 
     wmb(); /* Ensure that backend will see the request. */
@@ -579,7 +579,7 @@ static int netif_poll(struct net_device *dev, int *pbudget)
         mcl->args[2] = 0;
         mcl++;
 
-        phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
+        __vms_phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
             rx->addr >> PAGE_SHIFT;
 
         __skb_queue_tail(&rxq, skb);
@@ -736,7 +736,7 @@ static void network_connect(struct net_device *dev,
                 tx = &np->tx->ring[requeue_idx++].req;
                 
                 tx->id   = i;
-                tx->addr = virt_to_machine(skb->data);
+                tx->addr = __vms_virt_to_machine(skb->data);
                 tx->size = skb->len;
                 
                 np->stats.tx_bytes += skb->len;
@@ -799,8 +799,8 @@ static void send_interface_connect(struct net_private *np)
     netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
 
     msg->handle = np->handle;
-    msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
-    msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
+    msg->tx_shmem_frame = (__vms_virt_to_machine(np->tx) >> PAGE_SHIFT);
+    msg->rx_shmem_frame = (__vms_virt_to_machine(np->rx) >> PAGE_SHIFT);
         
     ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
 }
index c97fe7cf21125f19a820dc4532048e88def271b2..d90269f4865eb7f1a280ed810d8e310eab5ea766 100644 (file)
@@ -174,7 +174,7 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
 
     case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
     {
-        unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
+        unsigned long m2pv = (unsigned long)__vms_machine_to_phys_mapping;
         pgd_t *pgd = pgd_offset_k(m2pv);
         pmd_t *pmd = pmd_offset(pgd, m2pv);
         unsigned long m2p_start_mfn = pmd_val(*pmd) >> PAGE_SHIFT;
index 3cebc41697cab2715f509baf70448a89595c4090..1aaac4e1d7191403c70323d8fcecadfeb469eae1 100644 (file)
@@ -89,7 +89,7 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
 
 static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
 {
-#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
+#define C(i) HYPERVISOR_update_descriptor(__pa(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
        C(0); C(1); C(2);
 #undef C
 }
index cf12d309e3b57e639cd750be63a32e4b0ad9f706..ac965e283c8ede718eb71ea9dadfa8e62cbd0516 100644 (file)
@@ -101,15 +101,15 @@ enum fixed_addresses {
 
 extern void __set_fixmap (enum fixed_addresses idx,
                                        unsigned long phys, pgprot_t flags);
-extern void __set_fixmap_ma (enum fixed_addresses idx,
+extern void __vms___set_fixmap_ma (enum fixed_addresses idx,
                                        unsigned long mach, pgprot_t flags);
 
 #define set_fixmap(idx, phys) \
                __set_fixmap(idx, phys, PAGE_KERNEL)
-#define set_fixmap_ma(idx, phys) \
-               __set_fixmap_ma(idx, phys, PAGE_KERNEL)
-#define set_fixmap_ma_ro(idx, phys) \
-               __set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
+#define __vms_set_fixmap_ma(idx, phys) \
+               __vms___set_fixmap_ma(idx, phys, PAGE_KERNEL)
+#define __vms_set_fixmap_ma_ro(idx, phys) \
+               __vms___set_fixmap_ma(idx, phys, PAGE_KERNEL_RO)
 /*
  * Some hardware wants to get fixmapped without caching.
  */
index 2fa9f47cccd50c1191d6496012fa39041583c4a4..e216ea64e1a66b504278f8ccd03ee09270e65539 100644 (file)
@@ -89,18 +89,21 @@ static inline void * phys_to_virt(unsigned long address)
 /*
  * Change "struct page" to physical address.
  */
-#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-#define page_to_phys(page)      (phys_to_machine(page_to_pseudophys(page)))
+#define __vms_page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#define __vms_page_to_machphys(page) (__vms_phys_to_machine(__vms_page_to_pseudophys(page)))
+#define page_to_phys(page)      (__vms_page_to_machphys(page))
 
-#define bio_to_pseudophys(bio)  (page_to_pseudophys(bio_page((bio))) + \
+#define __vms_bio_to_pseudophys(bio)    (__vms_page_to_pseudophys(bio_page((bio))) + \
                                  (unsigned long) bio_offset((bio)))
-#define bvec_to_pseudophys(bv)  (page_to_pseudophys((bv)->bv_page) + \
+#define __vms_bvec_to_pseudophys(bv)    (__vms_page_to_pseudophys((bv)->bv_page) + \
+                                 (unsigned long) (bv)->bv_offset)
+#define __vms_bvec_to_machphys(bv)      (__vms_page_to_machphys((bv)->bv_page) + \
                                  (unsigned long) (bv)->bv_offset)
 
 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2)      \
        (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-        ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
-         bvec_to_pseudophys((vec2))))
+        ((__vms_bvec_to_machphys((vec1)) + (vec1)->bv_len) == \
+         __vms_bvec_to_machphys((vec2))))
 
 extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
 
@@ -149,8 +152,8 @@ extern void bt_iounmap(void *addr, unsigned long size);
  *
  * Allow them on x86 for legacy drivers, though.
  */
-#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-#define bus_to_virt(_x) __va(machine_to_phys(_x))
+#define virt_to_bus(_x) __vms_phys_to_machine(__pa(_x))
+#define bus_to_virt(_x) ({ BUG(); __va((_x)); })
 
 /*
  * readX/writeX() are used to access memory mapped devices. On some
index 6215a5b01868288a1e4290b786776d8be17b553a..40bda3c2d41a73eedad9f589a4fa873159911fe3 100644 (file)
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
+#ifndef BUG
+#include <asm/bug.h>
+#endif
+
 #include <linux/config.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
 
 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-extern unsigned int *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
-#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
-static inline unsigned long phys_to_machine(unsigned long phys)
+extern unsigned int *__vms_phys_to_machine_mapping;
+#define __vms_pfn_to_mfn(_pfn) ((unsigned long)(__vms_phys_to_machine_mapping[(_pfn)]))
+#define __vms_mfn_to_pfn(_mfn) ({ BUG(); ((unsigned long)(__vms_machine_to_phys_mapping[(_mfn)])); })
+static inline unsigned long __vms_phys_to_machine(unsigned long phys)
 {
-       unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+       unsigned long machine = __vms_pfn_to_mfn(phys >> PAGE_SHIFT);
        machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
        return machine;
 }
-static inline unsigned long machine_to_phys(unsigned long machine)
+static inline unsigned long __vms_machine_to_phys(unsigned long machine)
 {
-       unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+       unsigned long phys = __vms_mfn_to_pfn(machine >> PAGE_SHIFT);
        phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
        return phys;
 }
@@ -89,9 +93,8 @@ typedef struct { unsigned long pmd; } pmd_t;
 typedef struct { unsigned long pgd; } pgd_t;
 typedef struct { unsigned long pgprot; } pgprot_t;
 #define boot_pte_t pte_t /* or would you rather have a typedef */
-#define pte_val(x)     (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
-                        (x).pte_low)
-#define pte_val_ma(x)  ((x).pte_low)
+#define pte_val(x)     ((x).pte_low)
+#define __vms_pte_val_ma(x)    ((x).pte_low)
 #define HPAGE_SHIFT    22
 #endif
 #define PTE_MASK       PAGE_MASK
@@ -106,22 +109,17 @@ typedef struct { unsigned long pgprot; } pgprot_t;
 
 static inline unsigned long pmd_val(pmd_t x)
 {
-       unsigned long ret = x.pmd;
-       if (ret) ret = machine_to_phys(ret);
-       return ret;
+       return x.pmd;
 }
 #define pgd_val(x)     ({ BUG(); (unsigned long)0; })
 #define pgprot_val(x)  ((x).pgprot)
 
 static inline pte_t __pte(unsigned long x)
 {
-       if (x & 1) x = phys_to_machine(x);
        return ((pte_t) { (x) });
 }
-#define __pte_ma(x)    ((pte_t) { (x) } )
 static inline pmd_t __pmd(unsigned long x)
 {
-       if ((x & 1)) x = phys_to_machine(x);
        return ((pmd_t) { (x) });
 }
 #define __pgd(x)       ({ BUG(); (pgprot_t) { 0 }; })
@@ -199,8 +197,8 @@ extern int sysctl_legacy_va_layout;
                 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
 /* VIRT <-> MACHINE conversion */
-#define virt_to_machine(_a)    (phys_to_machine(__pa(_a)))
-#define machine_to_virt(_m)    (__va(machine_to_phys(_m)))
+#define __vms_virt_to_machine(_a)      (__vms_phys_to_machine(__pa(_a)))
+#define __vms_machine_to_virt(_m)      (__va(__vms_machine_to_phys(_m)))
 
 #endif /* __KERNEL__ */
 
index e9bf5f50b5161a5805f46e7c5c54630bafd5e481..406bed09b63d867108808c48bb1da9fbc782296e 100644 (file)
@@ -16,7 +16,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
        set_pmd(pmd, __pmd(_PAGE_TABLE +
                ((unsigned long long)page_to_pfn(pte) <<
                        (unsigned long long) PAGE_SHIFT)));
-       flush_page_update_queue();
 }
 /*
  * Allocate and free page tables.
@@ -31,8 +30,6 @@ extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
 static inline void pte_free_kernel(pte_t *pte)
 {
        free_page((unsigned long)pte);
-       make_page_writable(pte);
-       flush_page_update_queue();
 }
 
 extern void pte_free(struct page *pte);
index 3abd3ce14a56a5417b1092e4caf9a32802cb39b5..af90f6b2089f72d63ef8a952fcce381f1bf2d067 100644 (file)
@@ -43,7 +43,7 @@ do { \
  * (pmds are folded into pgds so this doesn't get actually called,
  * but the define is needed for a generic inline function.)
  */
-#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval).pmd)
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
 #define set_pgd(pgdptr, pgdval) ((void)0)
 
 #define pgd_page(pgd) \
@@ -67,7 +67,7 @@ static inline pte_t ptep_get_and_clear(pte_t *xp)
 {
        pte_t pte = *xp;
        if (pte.pte_low)
-               set_pte(xp, __pte_ma(0));
+               set_pte(xp, __pte(0));
        return pte;
 }
 
@@ -94,20 +94,12 @@ static inline pte_t ptep_get_and_clear(pte_t *xp)
  */
 #define INVALID_P2M_ENTRY (~0U)
 #define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
-#define pte_pfn(_pte)                                                  \
-({                                                                     \
-       unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT;               \
-       unsigned long pfn = mfn_to_pfn(mfn);                            \
-       if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn))             \
-               pfn = max_mapnr; /* special: force !pfn_valid() */      \
-       pfn;                                                            \
-})
+#define pte_pfn(_pte)          ((_pte).pte_low >> PAGE_SHIFT)
 
 #define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
 
 #define pte_none(x)            (!(x).pte_low)
 #define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pte_ma(pfn, prot)  __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 #define pfn_pmd(pfn, prot)     __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 
 /*
index 69e0fbf0ba9a2d4ee9c599377f662687aadfc475..f9f3c9ddd06b97ed5ed4e4d09c7c0e01733e06c6 100644 (file)
@@ -321,7 +321,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 
 #define pmd_clear(xp)  do {                                    \
        set_pmd(xp, __pmd(0));                                  \
-       xen_flush_page_update_queue();                          \
 } while (0)
 
 #ifndef CONFIG_DISCONTIGMEM
@@ -460,13 +459,22 @@ void make_page_writable(void *va);
 void make_pages_readonly(void *va, unsigned int nr);
 void make_pages_writable(void *va, unsigned int nr);
 
-#define arbitrary_virt_to_machine(__va)                                        \
+#define __vms_arbitrary_virt_to_machine(__va)                                  \
 ({                                                                     \
        pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));             \
        pmd_t *__pmd = pmd_offset(__pgd, (unsigned long)(__va));        \
        pte_t *__pte = pte_offset_kernel(__pmd, (unsigned long)(__va)); \
        unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK;     \
-       __pa | ((unsigned long)(__va) & (PAGE_SIZE-1));                 \
+       __vms_phys_to_machine(__pa) | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
+})
+
+#define arbitrary_virt_to_phys(__va)                                   \
+({                                                                     \
+       pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));             \
+       pmd_t *__pmd = pmd_offset(__pgd, (unsigned long)(__va));        \
+       pte_t *__pte = pte_offset_kernel(__pmd, (unsigned long)(__va)); \
+       unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK;     \
+       (__pa) | ((unsigned long)(__va) & (PAGE_SIZE-1));               \
 })
 
 #endif /* !__ASSEMBLY__ */
index 8f3c1477dc3e3688c52f541170e99d97b61e1080..06015a44f347f0adda053da8b6ec7a1ebd07c7d7 100644 (file)
@@ -70,8 +70,8 @@
  * machine->physical mapping table starts at this address, read-only.
  */
 #define HYPERVISOR_VIRT_START (0xFC000000UL)
-#ifndef machine_to_phys_mapping
-#define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
+#ifndef __vms_machine_to_phys_mapping
+#define __vms_machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
 #endif
 
 #ifndef __ASSEMBLY__